Loading libraries¶

In [ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import tensorflow as tf
import seaborn as sns
sns.set()
from PIL import Image
import cv2
import os
import scipy as sp
import IPython
from tensorflow.keras.utils import to_categorical
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import mean_absolute_error, confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from keras.models import Model
from tensorflow.keras import layers
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.models import clone_model
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import plot_model
from tensorflow.keras import regularizers
from tensorflow.keras import initializers
from tensorflow.keras.models import load_model

Mounting drive

In [ ]:
from google.colab import drive
drive.mount('/content/drive',force_remount=True)
Mounted at /content/drive

Reading masked data¶

In [ ]:
#Loading the mask superimposed UTK dataset
df_final = pd.read_csv('/content/drive/MyDrive/OCCLUDED.csv')
In [ ]:
df_final.head()
Out[ ]:
age gender race pixels
0 49 1 4 250 237 222 205 185 167 178 151 128 171 137 10...
1 50 0 0 102 85 59 93 75 50 72 54 32 74 53 34 103 72 45...
2 50 0 0 97 62 36 109 72 46 135 102 80 189 165 148 242 ...
3 50 0 0 59 107 131 62 104 122 69 102 112 80 108 112 13...
4 50 0 0 0 9 6 1 5 11 1 1 10 13 10 16 25 24 21 33 26 15...
In [ ]:
#labels of ethncity
df_final=df_final.loc[(df_final.race==0 ) | ( df_final.race==1) | ( df_final.race==2) | ( df_final.race==3) | (df_final.race==4)]
In [ ]:
#0 - represents white, 1 - represents black,2 - represents asian,3 - represents indian and 4 - represents others
df_final.race.value_counts()
Out[ ]:
0    8815
1    4109
3    3468
2    2573
4    1402
Name: race, dtype: int64
In [ ]:
# 0 represents male and 1 represents female
df_final.gender.value_counts()
Out[ ]:
0    10492
1     9875
Name: gender, dtype: int64
In [ ]:
#Getting the number of people belonging to each age
df_final.age.value_counts()
Out[ ]:
26     2095
28      893
35      866
24      833
25      714
       ... 
116       3
101       2
91        2
111       1
68        1
Name: age, Length: 95, dtype: int64
In [ ]:
df_final['pixels'] = df_final['pixels'].apply(lambda x: np.array(x.split(), dtype="float32"))
In [ ]:
# normalizing pixels data
df_final['pixels'] = df_final['pixels'].apply(lambda x: x/255)
df_final = df_final.sample(frac=1).reset_index(drop=True)
In [ ]:
X = np.array(df_final['pixels'].tolist())
df_final
Out[ ]:
age gender race pixels
0 90 0 0 [0.7490196, 0.3372549, 0.2784314, 0.67058825, ...
1 9 1 3 [0.99607843, 1.0, 1.0, 0.9882353, 0.99607843, ...
2 18 1 0 [0.2627451, 0.22352941, 0.21568628, 0.24313726...
3 60 0 0 [0.2, 0.14901961, 0.14901961, 0.24313726, 0.20...
4 36 0 0 [0.42745098, 0.34901962, 0.28627452, 0.5764706...
... ... ... ... ...
20362 22 1 2 [0.9098039, 0.8666667, 0.79607844, 0.8745098, ...
20363 4 1 2 [0.039215688, 0.0627451, 0.047058824, 0.047058...
20364 43 0 0 [0.59607846, 0.5294118, 0.46666667, 0.5568628,...
20365 40 0 0 [0.5137255, 0.27450982, 0.19215687, 0.5411765,...
20366 26 1 1 [0.64705884, 0.6156863, 0.5647059, 0.60784316,...

20367 rows × 4 columns

In [ ]:
X = X.reshape(X.shape[0],50,50,3)
In [ ]:
y_new = np.array(df_final[['gender', 'race', 'age']])
print(y_new)
[[ 0  0 90]
 [ 1  3  9]
 [ 1  0 18]
 ...
 [ 0  0 43]
 [ 0  0 40]
 [ 1  1 26]]
In [ ]:
# Splitting the data
# Training-60%, Validation-20%, Testing-20%
X_train, X_test, y_train, y_test = train_test_split(X, y_new, test_size=0.2, random_state=42)
X_train, X_cv, y_train, y_cv = train_test_split(X_train,y_train,test_size = 0.25,train_size =0.75,random_state=42)
In [ ]:
# Segregating the labels into different arrays
y_gender_train = y_train[:,0]
y_gender_test = y_test[:,0]
y_gender_cv = y_cv[:,0]
y_ethnicity_train = y_train[:,1]
y_ethnicity_test = y_test[:,1]
y_ethnicity_cv = y_cv[:,1]
y_age_train = y_train[:,2]
y_age_test = y_test[:,2]
y_age_cv = y_cv[:,2]
In [ ]:
#Finding the position to slice
eth_train_len = len(y_ethnicity_train)
eth_cv_len=len(y_ethnicity_cv)
y_ethnicity_concat = np.concatenate((y_ethnicity_train,y_ethnicity_cv, y_ethnicity_test))
y_ethnicity_concat = y_ethnicity_concat.astype(np.uint8)
y_ethnicity = to_categorical(y_ethnicity_concat)
In [ ]:
#One hot encoding- For example if a person is of ethnicity 'Indian' - 3, we one hot encode it so that it is saves as [0 0 0 1 0]. The '1' in the third position indicates that the ethnicity of the person in the image is 'Indian'.
y_ethnicity_train = y_ethnicity[:eth_train_len]
y_ethnicity_cv=y_ethnicity[eth_train_len:(eth_train_len+eth_cv_len)]
y_ethnicity_test = y_ethnicity[(eth_train_len+eth_cv_len):]

VGG16 Model¶

In [ ]:
inputs = tf.keras.Input(shape=(50, 50, 3))
x = inputs

x = Conv2D(160, kernel_size=(3,3),padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.LeakyReLU(0.3)(x)

x = tf.keras.layers.MaxPool2D()(x)

x = Conv2D(192, kernel_size=(3,3),padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.LeakyReLU(0.3)(x)

x = tf.keras.layers.AvgPool2D()(x)

x3 = Conv2D(224, kernel_size=(3,3),padding='same')(x)

x3 = tf.keras.layers.BatchNormalization()(x3)
x3 = tf.keras.layers.LeakyReLU(0.1)(x3)
x3 = tf.keras.layers.AvgPool2D()(x3)

x2 = Conv2D(224, kernel_size=(3,3),padding='same')(x3)

x2 = tf.keras.layers.BatchNormalization()(x2)
x2 = tf.keras.layers.LeakyReLU(0.1)(x2)
x2 = tf.keras.layers.MaxPool2D()(x2)

x1 = Conv2D(224, kernel_size=(3,3),padding='same')(x2)

x1 = tf.keras.layers.BatchNormalization()(x1)
x1 = tf.keras.layers.LeakyReLU(0.1)(x1)
x1 = tf.keras.layers.MaxPool2D()(x1)

x1 = layers.Flatten()(x1)
x1 = layers.Dense(1000,activation='relu')(x1)    
# x1 = tf.keras.layers.Dropout(0.5)(x1)           
x1 = layers.Dense(1000,activation='relu')(x1)


x2 = layers.Flatten()(x2)
x2 = layers.Dense(1000,activation='relu')(x2)    
# x2 = tf.keras.layers.Dropout(0.5)(x2)           
x2 = layers.Dense(1000,activation='relu')(x2)

x3 = layers.Flatten()(x3)
x3 = tf.keras.layers.Dense(3096, activation='relu',kernel_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01))(x3)
x3 = tf.keras.layers.Dropout(0.5)(x3)
x3 = tf.keras.layers.Dense(3096, activation='relu', kernel_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01))(x3)
x3= tf.keras.layers.Dropout(0.5)(x3)



#bottleneck
out_gender = layers.Dense(1, activation='sigmoid', name='gender_out')(x2) ## output binaire[0]
out_ethnicity = layers.Dense(5, activation='softmax', name='ethnicity_out')(x1) ## output catégoriel[0,1,0,0,0]
out_age=layers.Dense(1, name='age_out')(x3) ## output continue[42]


model = tf.keras.Model(inputs=inputs, outputs=[out_gender, out_ethnicity, out_age])

##Compiling Model
model.compile(
      optimizer='Adam',
        loss={'gender_out':'BinaryCrossentropy',
              'ethnicity_out':'categorical_crossentropy',
              'age_out':'mse'},
        metrics={'gender_out':'accuracy',
                 'ethnicity_out':'accuracy',
                 'age_out':'mae'}) 
                  # change this ti dense net, model has 3 outputs: gender,age,ethnicity
model.summary()
Model: "model_6"
__________________________________________________________________________________________________
 Layer (type)                   Output Shape         Param #     Connected to                     
==================================================================================================
 input_1 (InputLayer)           [(None, 50, 50, 3)]  0           []                               
                                                                                                  
 conv2d (Conv2D)                (None, 50, 50, 160)  4480        ['input_1[0][0]']                
                                                                                                  
 batch_normalization (BatchNorm  (None, 50, 50, 160)  640        ['conv2d[0][0]']                 
 alization)                                                                                       
                                                                                                  
 leaky_re_lu (LeakyReLU)        (None, 50, 50, 160)  0           ['batch_normalization[0][0]']    
                                                                                                  
 max_pooling2d (MaxPooling2D)   (None, 25, 25, 160)  0           ['leaky_re_lu[0][0]']            
                                                                                                  
 conv2d_1 (Conv2D)              (None, 25, 25, 192)  276672      ['max_pooling2d[0][0]']          
                                                                                                  
 batch_normalization_1 (BatchNo  (None, 25, 25, 192)  768        ['conv2d_1[0][0]']               
 rmalization)                                                                                     
                                                                                                  
 leaky_re_lu_1 (LeakyReLU)      (None, 25, 25, 192)  0           ['batch_normalization_1[0][0]']  
                                                                                                  
 average_pooling2d (AveragePool  (None, 12, 12, 192)  0          ['leaky_re_lu_1[0][0]']          
 ing2D)                                                                                           
                                                                                                  
 conv2d_2 (Conv2D)              (None, 12, 12, 224)  387296      ['average_pooling2d[0][0]']      
                                                                                                  
 batch_normalization_2 (BatchNo  (None, 12, 12, 224)  896        ['conv2d_2[0][0]']               
 rmalization)                                                                                     
                                                                                                  
 leaky_re_lu_2 (LeakyReLU)      (None, 12, 12, 224)  0           ['batch_normalization_2[0][0]']  
                                                                                                  
 average_pooling2d_1 (AveragePo  (None, 6, 6, 224)   0           ['leaky_re_lu_2[0][0]']          
 oling2D)                                                                                         
                                                                                                  
 conv2d_3 (Conv2D)              (None, 6, 6, 224)    451808      ['average_pooling2d_1[0][0]']    
                                                                                                  
 batch_normalization_3 (BatchNo  (None, 6, 6, 224)   896         ['conv2d_3[0][0]']               
 rmalization)                                                                                     
                                                                                                  
 leaky_re_lu_3 (LeakyReLU)      (None, 6, 6, 224)    0           ['batch_normalization_3[0][0]']  
                                                                                                  
 max_pooling2d_1 (MaxPooling2D)  (None, 3, 3, 224)   0           ['leaky_re_lu_3[0][0]']          
                                                                                                  
 conv2d_4 (Conv2D)              (None, 3, 3, 224)    451808      ['max_pooling2d_1[0][0]']        
                                                                                                  
 batch_normalization_4 (BatchNo  (None, 3, 3, 224)   896         ['conv2d_4[0][0]']               
 rmalization)                                                                                     
                                                                                                  
 leaky_re_lu_4 (LeakyReLU)      (None, 3, 3, 224)    0           ['batch_normalization_4[0][0]']  
                                                                                                  
 flatten_2 (Flatten)            (None, 8064)         0           ['average_pooling2d_1[0][0]']    
                                                                                                  
 max_pooling2d_2 (MaxPooling2D)  (None, 1, 1, 224)   0           ['leaky_re_lu_4[0][0]']          
                                                                                                  
 dense_4 (Dense)                (None, 3096)         24969240    ['flatten_2[0][0]']              
                                                                                                  
 flatten_1 (Flatten)            (None, 2016)         0           ['max_pooling2d_1[0][0]']        
                                                                                                  
 flatten (Flatten)              (None, 224)          0           ['max_pooling2d_2[0][0]']        
                                                                                                  
 dropout (Dropout)              (None, 3096)         0           ['dense_4[0][0]']                
                                                                                                  
 dense_2 (Dense)                (None, 1000)         2017000     ['flatten_1[0][0]']              
                                                                                                  
 dense (Dense)                  (None, 1000)         225000      ['flatten[0][0]']                
                                                                                                  
 dense_5 (Dense)                (None, 3096)         9588312     ['dropout[0][0]']                
                                                                                                  
 dense_3 (Dense)                (None, 1000)         1001000     ['dense_2[0][0]']                
                                                                                                  
 dense_1 (Dense)                (None, 1000)         1001000     ['dense[0][0]']                  
                                                                                                  
 dropout_1 (Dropout)            (None, 3096)         0           ['dense_5[0][0]']                
                                                                                                  
 gender_out (Dense)             (None, 1)            1001        ['dense_3[0][0]']                
                                                                                                  
 ethnicity_out (Dense)          (None, 5)            5005        ['dense_1[0][0]']                
                                                                                                  
 age_out (Dense)                (None, 1)            3097        ['dropout_1[0][0]']              
                                                                                                  
==================================================================================================
Total params: 40,386,815
Trainable params: 40,384,767
Non-trainable params: 2,048
__________________________________________________________________________________________________

TRAINING¶

In [ ]:
batch_size = 32
epochs = 40
In [ ]:
history = model.fit(X_train, {'gender_out': y_gender_train, 'ethnicity_out': y_ethnicity_train, 'age_out': y_age_train},
                         batch_size=batch_size,
                         epochs = epochs, validation_data = (X_cv, [y_gender_cv, y_ethnicity_cv, y_age_cv]),
                         steps_per_epoch=(X_train.shape[0] // batch_size), verbose = 1)
Epoch 1/40
1527/1527 [==============================] - 35s 18ms/step - loss: 323.7038 - gender_out_loss: 0.7114 - ethnicity_out_loss: 1.3917 - age_out_loss: 321.6004 - gender_out_accuracy: 0.6239 - ethnicity_out_accuracy: 0.4480 - age_out_mae: 13.3786 - val_loss: 268.5219 - val_gender_out_loss: 0.5831 - val_ethnicity_out_loss: 1.3143 - val_age_out_loss: 266.6245 - val_gender_out_accuracy: 0.7273 - val_ethnicity_out_accuracy: 0.4919 - val_age_out_mae: 11.7743
Epoch 2/40
1527/1527 [==============================] - 21s 14ms/step - loss: 257.1779 - gender_out_loss: 0.5370 - ethnicity_out_loss: 1.2938 - age_out_loss: 255.3473 - gender_out_accuracy: 0.7393 - ethnicity_out_accuracy: 0.4914 - age_out_mae: 11.9146 - val_loss: 235.6608 - val_gender_out_loss: 0.5083 - val_ethnicity_out_loss: 1.2167 - val_age_out_loss: 233.9358 - val_gender_out_accuracy: 0.7575 - val_ethnicity_out_accuracy: 0.5437 - val_age_out_mae: 11.3228
Epoch 3/40
1527/1527 [==============================] - 20s 13ms/step - loss: 225.2323 - gender_out_loss: 0.4673 - ethnicity_out_loss: 1.2125 - age_out_loss: 223.5525 - gender_out_accuracy: 0.7842 - ethnicity_out_accuracy: 0.5338 - age_out_mae: 11.1633 - val_loss: 492.0294 - val_gender_out_loss: 0.4537 - val_ethnicity_out_loss: 1.2236 - val_age_out_loss: 490.3524 - val_gender_out_accuracy: 0.8144 - val_ethnicity_out_accuracy: 0.5211 - val_age_out_mae: 17.7536
Epoch 4/40
1527/1527 [==============================] - 20s 13ms/step - loss: 201.7944 - gender_out_loss: 0.4232 - ethnicity_out_loss: 1.1364 - age_out_loss: 200.2347 - gender_out_accuracy: 0.8117 - ethnicity_out_accuracy: 0.5796 - age_out_mae: 10.5502 - val_loss: 500.4611 - val_gender_out_loss: 0.4611 - val_ethnicity_out_loss: 1.1906 - val_age_out_loss: 498.8092 - val_gender_out_accuracy: 0.7877 - val_ethnicity_out_accuracy: 0.5749 - val_age_out_mae: 17.7691
Epoch 5/40
1527/1527 [==============================] - 20s 13ms/step - loss: 181.8077 - gender_out_loss: 0.3954 - ethnicity_out_loss: 1.0762 - age_out_loss: 180.3364 - gender_out_accuracy: 0.8286 - ethnicity_out_accuracy: 0.6045 - age_out_mae: 10.0618 - val_loss: 210.6388 - val_gender_out_loss: 0.4456 - val_ethnicity_out_loss: 1.1111 - val_age_out_loss: 209.0821 - val_gender_out_accuracy: 0.7926 - val_ethnicity_out_accuracy: 0.5908 - val_age_out_mae: 10.9174
Epoch 6/40
1527/1527 [==============================] - 20s 13ms/step - loss: 168.1642 - gender_out_loss: 0.3741 - ethnicity_out_loss: 1.0129 - age_out_loss: 166.7770 - gender_out_accuracy: 0.8401 - ethnicity_out_accuracy: 0.6309 - age_out_mae: 9.7349 - val_loss: 331.0715 - val_gender_out_loss: 0.4265 - val_ethnicity_out_loss: 1.0222 - val_age_out_loss: 329.6230 - val_gender_out_accuracy: 0.8402 - val_ethnicity_out_accuracy: 0.6308 - val_age_out_mae: 14.4089
Epoch 7/40
1527/1527 [==============================] - 20s 13ms/step - loss: 159.7575 - gender_out_loss: 0.3585 - ethnicity_out_loss: 0.9611 - age_out_loss: 158.4380 - gender_out_accuracy: 0.8463 - ethnicity_out_accuracy: 0.6567 - age_out_mae: 9.4866 - val_loss: 247.4861 - val_gender_out_loss: 0.3993 - val_ethnicity_out_loss: 1.1289 - val_age_out_loss: 245.9579 - val_gender_out_accuracy: 0.8351 - val_ethnicity_out_accuracy: 0.6031 - val_age_out_mae: 12.1246
Epoch 8/40
1527/1527 [==============================] - 20s 13ms/step - loss: 147.7237 - gender_out_loss: 0.3549 - ethnicity_out_loss: 0.9089 - age_out_loss: 146.4597 - gender_out_accuracy: 0.8498 - ethnicity_out_accuracy: 0.6780 - age_out_mae: 9.1512 - val_loss: 371.5719 - val_gender_out_loss: 0.3981 - val_ethnicity_out_loss: 0.9902 - val_age_out_loss: 370.1836 - val_gender_out_accuracy: 0.8213 - val_ethnicity_out_accuracy: 0.6586 - val_age_out_mae: 15.7794
Epoch 9/40
1527/1527 [==============================] - 20s 13ms/step - loss: 141.5716 - gender_out_loss: 0.3408 - ethnicity_out_loss: 0.8684 - age_out_loss: 140.3624 - gender_out_accuracy: 0.8565 - ethnicity_out_accuracy: 0.6895 - age_out_mae: 8.9514 - val_loss: 599.0993 - val_gender_out_loss: 0.3684 - val_ethnicity_out_loss: 0.9594 - val_age_out_loss: 597.7714 - val_gender_out_accuracy: 0.8513 - val_ethnicity_out_accuracy: 0.6571 - val_age_out_mae: 20.6419
Epoch 10/40
1527/1527 [==============================] - 20s 13ms/step - loss: 135.6490 - gender_out_loss: 0.3249 - ethnicity_out_loss: 0.8152 - age_out_loss: 134.5090 - gender_out_accuracy: 0.8627 - ethnicity_out_accuracy: 0.7099 - age_out_mae: 8.8151 - val_loss: 424.0415 - val_gender_out_loss: 0.3345 - val_ethnicity_out_loss: 1.1484 - val_age_out_loss: 422.5585 - val_gender_out_accuracy: 0.8657 - val_ethnicity_out_accuracy: 0.5849 - val_age_out_mae: 16.3562
Epoch 11/40
1527/1527 [==============================] - 20s 13ms/step - loss: 133.0142 - gender_out_loss: 0.3066 - ethnicity_out_loss: 0.7661 - age_out_loss: 131.9414 - gender_out_accuracy: 0.8727 - ethnicity_out_accuracy: 0.7350 - age_out_mae: 8.7116 - val_loss: 387.0347 - val_gender_out_loss: 0.3715 - val_ethnicity_out_loss: 1.0186 - val_age_out_loss: 385.6443 - val_gender_out_accuracy: 0.8576 - val_ethnicity_out_accuracy: 0.6352 - val_age_out_mae: 16.1233
Epoch 12/40
1527/1527 [==============================] - 20s 13ms/step - loss: 123.4598 - gender_out_loss: 0.2946 - ethnicity_out_loss: 0.7210 - age_out_loss: 122.4440 - gender_out_accuracy: 0.8801 - ethnicity_out_accuracy: 0.7432 - age_out_mae: 8.3716 - val_loss: 316.8261 - val_gender_out_loss: 0.3649 - val_ethnicity_out_loss: 1.0352 - val_age_out_loss: 315.4262 - val_gender_out_accuracy: 0.8549 - val_ethnicity_out_accuracy: 0.6635 - val_age_out_mae: 14.2583
Epoch 13/40
1527/1527 [==============================] - 20s 13ms/step - loss: 121.9688 - gender_out_loss: 0.2827 - ethnicity_out_loss: 0.6820 - age_out_loss: 121.0040 - gender_out_accuracy: 0.8851 - ethnicity_out_accuracy: 0.7583 - age_out_mae: 8.3296 - val_loss: 163.6945 - val_gender_out_loss: 0.3692 - val_ethnicity_out_loss: 1.0457 - val_age_out_loss: 162.2798 - val_gender_out_accuracy: 0.8503 - val_ethnicity_out_accuracy: 0.6333 - val_age_out_mae: 9.7773
Epoch 14/40
1527/1527 [==============================] - 20s 13ms/step - loss: 119.2087 - gender_out_loss: 0.2719 - ethnicity_out_loss: 0.6285 - age_out_loss: 118.3083 - gender_out_accuracy: 0.8919 - ethnicity_out_accuracy: 0.7791 - age_out_mae: 8.2160 - val_loss: 332.1840 - val_gender_out_loss: 0.3245 - val_ethnicity_out_loss: 1.0553 - val_age_out_loss: 330.8042 - val_gender_out_accuracy: 0.8775 - val_ethnicity_out_accuracy: 0.6821 - val_age_out_mae: 14.8229
Epoch 15/40
1527/1527 [==============================] - 20s 13ms/step - loss: 113.7213 - gender_out_loss: 0.2588 - ethnicity_out_loss: 0.5794 - age_out_loss: 112.8831 - gender_out_accuracy: 0.8991 - ethnicity_out_accuracy: 0.7973 - age_out_mae: 8.0789 - val_loss: 433.6667 - val_gender_out_loss: 0.3612 - val_ethnicity_out_loss: 1.3914 - val_age_out_loss: 431.9140 - val_gender_out_accuracy: 0.8721 - val_ethnicity_out_accuracy: 0.5950 - val_age_out_mae: 17.1476
Epoch 16/40
1527/1527 [==============================] - 20s 13ms/step - loss: 108.6970 - gender_out_loss: 0.2514 - ethnicity_out_loss: 0.5392 - age_out_loss: 107.9064 - gender_out_accuracy: 0.9040 - ethnicity_out_accuracy: 0.8108 - age_out_mae: 7.8685 - val_loss: 199.9440 - val_gender_out_loss: 0.3937 - val_ethnicity_out_loss: 1.2793 - val_age_out_loss: 198.2711 - val_gender_out_accuracy: 0.8427 - val_ethnicity_out_accuracy: 0.6225 - val_age_out_mae: 10.9450
Epoch 17/40
1527/1527 [==============================] - 20s 13ms/step - loss: 104.8166 - gender_out_loss: 0.2368 - ethnicity_out_loss: 0.4836 - age_out_loss: 104.0961 - gender_out_accuracy: 0.9049 - ethnicity_out_accuracy: 0.8284 - age_out_mae: 7.7497 - val_loss: 310.1180 - val_gender_out_loss: 0.3262 - val_ethnicity_out_loss: 1.1222 - val_age_out_loss: 308.6694 - val_gender_out_accuracy: 0.8768 - val_ethnicity_out_accuracy: 0.6568 - val_age_out_mae: 14.2557
Epoch 18/40
1527/1527 [==============================] - 20s 13ms/step - loss: 100.7375 - gender_out_loss: 0.2251 - ethnicity_out_loss: 0.4277 - age_out_loss: 100.0848 - gender_out_accuracy: 0.9150 - ethnicity_out_accuracy: 0.8492 - age_out_mae: 7.5854 - val_loss: 338.6359 - val_gender_out_loss: 0.3183 - val_ethnicity_out_loss: 1.2334 - val_age_out_loss: 337.0843 - val_gender_out_accuracy: 0.8787 - val_ethnicity_out_accuracy: 0.6726 - val_age_out_mae: 14.8708
Epoch 19/40
1527/1527 [==============================] - 20s 13ms/step - loss: 95.6302 - gender_out_loss: 0.2221 - ethnicity_out_loss: 0.3973 - age_out_loss: 95.0109 - gender_out_accuracy: 0.9163 - ethnicity_out_accuracy: 0.8582 - age_out_mae: 7.3707 - val_loss: 286.9449 - val_gender_out_loss: 0.3467 - val_ethnicity_out_loss: 1.2801 - val_age_out_loss: 285.3181 - val_gender_out_accuracy: 0.8751 - val_ethnicity_out_accuracy: 0.6456 - val_age_out_mae: 13.5530
Epoch 20/40
1527/1527 [==============================] - 20s 13ms/step - loss: 93.4689 - gender_out_loss: 0.2106 - ethnicity_out_loss: 0.3625 - age_out_loss: 92.8958 - gender_out_accuracy: 0.9202 - ethnicity_out_accuracy: 0.8741 - age_out_mae: 7.2999 - val_loss: 375.0273 - val_gender_out_loss: 0.3187 - val_ethnicity_out_loss: 1.3136 - val_age_out_loss: 373.3951 - val_gender_out_accuracy: 0.8738 - val_ethnicity_out_accuracy: 0.6532 - val_age_out_mae: 15.9810
Epoch 21/40
1527/1527 [==============================] - 20s 13ms/step - loss: 87.6478 - gender_out_loss: 0.1966 - ethnicity_out_loss: 0.3165 - age_out_loss: 87.1347 - gender_out_accuracy: 0.9268 - ethnicity_out_accuracy: 0.8888 - age_out_mae: 7.0985 - val_loss: 318.8818 - val_gender_out_loss: 0.3067 - val_ethnicity_out_loss: 1.4835 - val_age_out_loss: 317.0916 - val_gender_out_accuracy: 0.8778 - val_ethnicity_out_accuracy: 0.6507 - val_age_out_mae: 14.4675
Epoch 22/40
1527/1527 [==============================] - 21s 14ms/step - loss: 87.0476 - gender_out_loss: 0.1894 - ethnicity_out_loss: 0.2873 - age_out_loss: 86.5708 - gender_out_accuracy: 0.9292 - ethnicity_out_accuracy: 0.8986 - age_out_mae: 7.0440 - val_loss: 279.4834 - val_gender_out_loss: 0.3208 - val_ethnicity_out_loss: 1.5290 - val_age_out_loss: 277.6336 - val_gender_out_accuracy: 0.8721 - val_ethnicity_out_accuracy: 0.6674 - val_age_out_mae: 13.2183
Epoch 23/40
1527/1527 [==============================] - 20s 13ms/step - loss: 83.0396 - gender_out_loss: 0.1787 - ethnicity_out_loss: 0.2613 - age_out_loss: 82.5997 - gender_out_accuracy: 0.9328 - ethnicity_out_accuracy: 0.9079 - age_out_mae: 6.8516 - val_loss: 262.8104 - val_gender_out_loss: 0.3282 - val_ethnicity_out_loss: 1.7513 - val_age_out_loss: 260.7309 - val_gender_out_accuracy: 0.8756 - val_ethnicity_out_accuracy: 0.6514 - val_age_out_mae: 12.8182
Epoch 24/40
1527/1527 [==============================] - 20s 13ms/step - loss: 80.0259 - gender_out_loss: 0.1722 - ethnicity_out_loss: 0.2338 - age_out_loss: 79.6200 - gender_out_accuracy: 0.9355 - ethnicity_out_accuracy: 0.9197 - age_out_mae: 6.7393 - val_loss: 317.9184 - val_gender_out_loss: 0.3225 - val_ethnicity_out_loss: 1.7833 - val_age_out_loss: 315.8126 - val_gender_out_accuracy: 0.8760 - val_ethnicity_out_accuracy: 0.6524 - val_age_out_mae: 14.2157
Epoch 25/40
1527/1527 [==============================] - 20s 13ms/step - loss: 75.2665 - gender_out_loss: 0.1654 - ethnicity_out_loss: 0.2149 - age_out_loss: 74.8861 - gender_out_accuracy: 0.9402 - ethnicity_out_accuracy: 0.9246 - age_out_mae: 6.5474 - val_loss: 325.9619 - val_gender_out_loss: 0.3207 - val_ethnicity_out_loss: 1.7185 - val_age_out_loss: 323.9229 - val_gender_out_accuracy: 0.8844 - val_ethnicity_out_accuracy: 0.6713 - val_age_out_mae: 14.5008
Epoch 26/40
1527/1527 [==============================] - 20s 13ms/step - loss: 70.3757 - gender_out_loss: 0.1543 - ethnicity_out_loss: 0.1865 - age_out_loss: 70.0350 - gender_out_accuracy: 0.9424 - ethnicity_out_accuracy: 0.9372 - age_out_mae: 6.3671 - val_loss: 255.6800 - val_gender_out_loss: 0.3584 - val_ethnicity_out_loss: 1.7188 - val_age_out_loss: 253.6028 - val_gender_out_accuracy: 0.8787 - val_ethnicity_out_accuracy: 0.6674 - val_age_out_mae: 12.5169
Epoch 27/40
1527/1527 [==============================] - 20s 13ms/step - loss: 68.6161 - gender_out_loss: 0.1456 - ethnicity_out_loss: 0.1806 - age_out_loss: 68.2900 - gender_out_accuracy: 0.9458 - ethnicity_out_accuracy: 0.9406 - age_out_mae: 6.2787 - val_loss: 240.7778 - val_gender_out_loss: 0.3211 - val_ethnicity_out_loss: 1.8120 - val_age_out_loss: 238.6446 - val_gender_out_accuracy: 0.8773 - val_ethnicity_out_accuracy: 0.6350 - val_age_out_mae: 11.9223
Epoch 28/40
1527/1527 [==============================] - 20s 13ms/step - loss: 66.0790 - gender_out_loss: 0.1325 - ethnicity_out_loss: 0.1494 - age_out_loss: 65.7972 - gender_out_accuracy: 0.9526 - ethnicity_out_accuracy: 0.9506 - age_out_mae: 6.1543 - val_loss: 361.7072 - val_gender_out_loss: 0.3244 - val_ethnicity_out_loss: 1.7157 - val_age_out_loss: 359.6671 - val_gender_out_accuracy: 0.8834 - val_ethnicity_out_accuracy: 0.6586 - val_age_out_mae: 15.6452
Epoch 29/40
1527/1527 [==============================] - 20s 13ms/step - loss: 64.7977 - gender_out_loss: 0.1266 - ethnicity_out_loss: 0.1503 - age_out_loss: 64.5208 - gender_out_accuracy: 0.9537 - ethnicity_out_accuracy: 0.9518 - age_out_mae: 6.0555 - val_loss: 204.0249 - val_gender_out_loss: 0.4421 - val_ethnicity_out_loss: 1.9609 - val_age_out_loss: 201.6221 - val_gender_out_accuracy: 0.8719 - val_ethnicity_out_accuracy: 0.6586 - val_age_out_mae: 10.9431
Epoch 30/40
1527/1527 [==============================] - 20s 13ms/step - loss: 62.0085 - gender_out_loss: 0.1243 - ethnicity_out_loss: 0.1565 - age_out_loss: 61.7278 - gender_out_accuracy: 0.9559 - ethnicity_out_accuracy: 0.9487 - age_out_mae: 5.9626 - val_loss: 389.9092 - val_gender_out_loss: 0.3398 - val_ethnicity_out_loss: 1.9412 - val_age_out_loss: 387.6282 - val_gender_out_accuracy: 0.8731 - val_ethnicity_out_accuracy: 0.6568 - val_age_out_mae: 16.0852
Epoch 31/40
1527/1527 [==============================] - 20s 13ms/step - loss: 59.2326 - gender_out_loss: 0.1127 - ethnicity_out_loss: 0.1403 - age_out_loss: 58.9796 - gender_out_accuracy: 0.9577 - ethnicity_out_accuracy: 0.9553 - age_out_mae: 5.8334 - val_loss: 217.3285 - val_gender_out_loss: 0.3844 - val_ethnicity_out_loss: 1.8499 - val_age_out_loss: 215.0941 - val_gender_out_accuracy: 0.8760 - val_ethnicity_out_accuracy: 0.6701 - val_age_out_mae: 11.4279
Epoch 32/40
1527/1527 [==============================] - 20s 13ms/step - loss: 57.4612 - gender_out_loss: 0.1054 - ethnicity_out_loss: 0.1223 - age_out_loss: 57.2337 - gender_out_accuracy: 0.9622 - ethnicity_out_accuracy: 0.9595 - age_out_mae: 5.7084 - val_loss: 406.9776 - val_gender_out_loss: 0.3925 - val_ethnicity_out_loss: 2.2572 - val_age_out_loss: 404.3281 - val_gender_out_accuracy: 0.8824 - val_ethnicity_out_accuracy: 0.6593 - val_age_out_mae: 16.3906
Epoch 33/40
1527/1527 [==============================] - 20s 13ms/step - loss: 53.8035 - gender_out_loss: 0.1031 - ethnicity_out_loss: 0.1316 - age_out_loss: 53.5689 - gender_out_accuracy: 0.9631 - ethnicity_out_accuracy: 0.9577 - age_out_mae: 5.5832 - val_loss: 351.0071 - val_gender_out_loss: 0.3834 - val_ethnicity_out_loss: 1.7477 - val_age_out_loss: 348.8760 - val_gender_out_accuracy: 0.8746 - val_ethnicity_out_accuracy: 0.6546 - val_age_out_mae: 14.8350
Epoch 34/40
1527/1527 [==============================] - 20s 13ms/step - loss: 53.5682 - gender_out_loss: 0.0951 - ethnicity_out_loss: 0.1127 - age_out_loss: 53.3605 - gender_out_accuracy: 0.9654 - ethnicity_out_accuracy: 0.9636 - age_out_mae: 5.5168 - val_loss: 372.7451 - val_gender_out_loss: 0.4826 - val_ethnicity_out_loss: 1.8787 - val_age_out_loss: 370.3838 - val_gender_out_accuracy: 0.8743 - val_ethnicity_out_accuracy: 0.6684 - val_age_out_mae: 15.6482
Epoch 35/40
1527/1527 [==============================] - 20s 13ms/step - loss: 50.3101 - gender_out_loss: 0.0973 - ethnicity_out_loss: 0.1165 - age_out_loss: 50.0963 - gender_out_accuracy: 0.9655 - ethnicity_out_accuracy: 0.9616 - age_out_mae: 5.3372 - val_loss: 334.6974 - val_gender_out_loss: 0.3684 - val_ethnicity_out_loss: 2.0923 - val_age_out_loss: 332.2367 - val_gender_out_accuracy: 0.8849 - val_ethnicity_out_accuracy: 0.6740 - val_age_out_mae: 14.6931
Epoch 36/40
1527/1527 [==============================] - 20s 13ms/step - loss: 48.9664 - gender_out_loss: 0.0842 - ethnicity_out_loss: 0.1054 - age_out_loss: 48.7768 - gender_out_accuracy: 0.9687 - ethnicity_out_accuracy: 0.9659 - age_out_mae: 5.2732 - val_loss: 325.1982 - val_gender_out_loss: 0.3897 - val_ethnicity_out_loss: 2.0723 - val_age_out_loss: 322.7362 - val_gender_out_accuracy: 0.8819 - val_ethnicity_out_accuracy: 0.6578 - val_age_out_mae: 14.3628
Epoch 37/40
1527/1527 [==============================] - 20s 13ms/step - loss: 46.8933 - gender_out_loss: 0.0804 - ethnicity_out_loss: 0.1019 - age_out_loss: 46.7112 - gender_out_accuracy: 0.9711 - ethnicity_out_accuracy: 0.9679 - age_out_mae: 5.1776 - val_loss: 217.5118 - val_gender_out_loss: 0.4918 - val_ethnicity_out_loss: 2.2258 - val_age_out_loss: 214.7942 - val_gender_out_accuracy: 0.8714 - val_ethnicity_out_accuracy: 0.6659 - val_age_out_mae: 11.1260
Epoch 38/40
1527/1527 [==============================] - 20s 13ms/step - loss: 45.2936 - gender_out_loss: 0.0726 - ethnicity_out_loss: 0.0958 - age_out_loss: 45.1252 - gender_out_accuracy: 0.9735 - ethnicity_out_accuracy: 0.9692 - age_out_mae: 5.0807 - val_loss: 278.0116 - val_gender_out_loss: 0.4145 - val_ethnicity_out_loss: 2.0238 - val_age_out_loss: 275.5735 - val_gender_out_accuracy: 0.8756 - val_ethnicity_out_accuracy: 0.6424 - val_age_out_mae: 13.1162
Epoch 39/40
1527/1527 [==============================] - 20s 13ms/step - loss: 43.4894 - gender_out_loss: 0.0712 - ethnicity_out_loss: 0.0966 - age_out_loss: 43.3217 - gender_out_accuracy: 0.9730 - ethnicity_out_accuracy: 0.9695 - age_out_mae: 5.0177 - val_loss: 285.9565 - val_gender_out_loss: 0.4234 - val_ethnicity_out_loss: 2.1057 - val_age_out_loss: 283.4275 - val_gender_out_accuracy: 0.8807 - val_ethnicity_out_accuracy: 0.6630 - val_age_out_mae: 13.2552
Epoch 40/40
1527/1527 [==============================] - 20s 13ms/step - loss: 42.3439 - gender_out_loss: 0.0722 - ethnicity_out_loss: 0.0955 - age_out_loss: 42.1762 - gender_out_accuracy: 0.9730 - ethnicity_out_accuracy: 0.9686 - age_out_mae: 4.8998 - val_loss: 231.5461 - val_gender_out_loss: 0.5709 - val_ethnicity_out_loss: 2.8153 - val_age_out_loss: 228.1599 - val_gender_out_accuracy: 0.8731 - val_ethnicity_out_accuracy: 0.6514 - val_age_out_mae: 11.5862
In [ ]:
model.save('VGG_train.h5')

RETRAIN - 1¶

In [ ]:
model1 = load_model("VGG_train.h5")
In [ ]:
##Defining batch size and callbacks
batch_size = 17
epochs = 5
In [ ]:
# #Training model
# np.random.seed(123)
# tf.random.set_seed(123)
history = model1.fit(X_train, {'gender_out': y_gender_train, 'ethnicity_out': y_ethnicity_train, 'age_out': y_age_train},
                         batch_size=batch_size,validation_data = (X_cv, [y_gender_cv, y_ethnicity_cv, y_age_cv]),
                         verbose = 0,
                         steps_per_epoch=(X_train.shape[0] // batch_size)
                         )
Epoch 1/5
718/718 [==============================] - 19s 19ms/step - loss: 116.7863 - gender_out_loss: 0.3715 - ethnicity_out_loss: 0.7499 - age_out_loss: 115.6649 - gender_out_accuracy: 0.8390 - ethnicity_out_accuracy: 0.7459 - age_out_mae: 7.9973 - val_loss: 69.6771 - val_gender_out_loss: 0.2493 - val_ethnicity_out_loss: 0.7270 - val_age_out_loss: 68.7008 - val_gender_out_accuracy: 0.9060 - val_ethnicity_out_accuracy: 0.7523 - val_age_out_mae: 6.2634
Epoch 2/5
718/718 [==============================] - 14s 18ms/step - loss: 67.5913 - gender_out_loss: 0.2422 - ethnicity_out_loss: 0.4273 - age_out_loss: 66.9217 - gender_out_accuracy: 0.9059 - ethnicity_out_accuracy: 0.8609 - age_out_mae: 6.1658 - val_loss: 75.4202 - val_gender_out_loss: 0.2420 - val_ethnicity_out_loss: 0.5529 - val_age_out_loss: 74.6252 - val_gender_out_accuracy: 0.9097 - val_ethnicity_out_accuracy: 0.8191 - val_age_out_mae: 6.3696
Epoch 3/5
718/718 [==============================] - 13s 18ms/step - loss: 55.9042 - gender_out_loss: 0.2032 - ethnicity_out_loss: 0.2669 - age_out_loss: 55.4341 - gender_out_accuracy: 0.9265 - ethnicity_out_accuracy: 0.9106 - age_out_mae: 5.6373 - val_loss: 61.8589 - val_gender_out_loss: 0.2384 - val_ethnicity_out_loss: 0.7203 - val_age_out_loss: 60.9002 - val_gender_out_accuracy: 0.9075 - val_ethnicity_out_accuracy: 0.7855 - val_age_out_mae: 5.7681
Epoch 4/5
718/718 [==============================] - 13s 18ms/step - loss: 51.1276 - gender_out_loss: 0.1756 - ethnicity_out_loss: 0.1867 - age_out_loss: 50.7652 - gender_out_accuracy: 0.9344 - ethnicity_out_accuracy: 0.9382 - age_out_mae: 5.3846 - val_loss: 81.0012 - val_gender_out_loss: 0.2529 - val_ethnicity_out_loss: 0.7463 - val_age_out_loss: 80.0021 - val_gender_out_accuracy: 0.9092 - val_ethnicity_out_accuracy: 0.8073 - val_age_out_mae: 6.6062
Epoch 5/5
718/718 [==============================] - 13s 18ms/step - loss: 46.4928 - gender_out_loss: 0.1497 - ethnicity_out_loss: 0.1402 - age_out_loss: 46.2030 - gender_out_accuracy: 0.9470 - ethnicity_out_accuracy: 0.9519 - age_out_mae: 5.1450 - val_loss: 62.6454 - val_gender_out_loss: 0.2325 - val_ethnicity_out_loss: 0.7226 - val_age_out_loss: 61.6902 - val_gender_out_accuracy: 0.9148 - val_ethnicity_out_accuracy: 0.8063 - val_age_out_mae: 5.7772
In [ ]:
model1.save('VGG_retrain.h5')

RETRAIN - 2¶

In [ ]:
model2 = load_model("VGG_retrain.h5")
In [ ]:
##Defining batch size and callbacks
batch_size = 32
epochs = 3
In [ ]:
#Training model
history = model2.fit(X_train, {'gender_out': y_gender_train, 'ethnicity_out': y_ethnicity_train, 'age_out': y_age_train},
                         batch_size=batch_size,
                         epochs = epochs, validation_data = (X_cv, [y_gender_cv, y_ethnicity_cv, y_age_cv]),
                         steps_per_epoch=(X_train.shape[0] // batch_size))
Epoch 1/3
381/381 [==============================] - 23s 43ms/step - loss: 130.1086 - gender_out_loss: 0.1440 - ethnicity_out_loss: 0.3920 - age_out_loss: 57.1314 - gender_out_accuracy: 0.9546 - ethnicity_out_accuracy: 0.8883 - age_out_mae: 5.4963 - val_loss: 118.3470 - val_gender_out_loss: 0.1369 - val_ethnicity_out_loss: 0.3767 - val_age_out_loss: 47.5062 - val_gender_out_accuracy: 0.9526 - val_ethnicity_out_accuracy: 0.8910 - val_age_out_mae: 4.9060
Epoch 2/3
381/381 [==============================] - 16s 42ms/step - loss: 122.2475 - gender_out_loss: 0.1034 - ethnicity_out_loss: 0.2327 - age_out_loss: 51.5277 - gender_out_accuracy: 0.9696 - ethnicity_out_accuracy: 0.9278 - age_out_mae: 5.2441 - val_loss: 113.2697 - val_gender_out_loss: 0.1748 - val_ethnicity_out_loss: 0.3705 - val_age_out_loss: 44.2503 - val_gender_out_accuracy: 0.9450 - val_ethnicity_out_accuracy: 0.8957 - val_age_out_mae: 4.8189
Epoch 3/3
381/381 [==============================] - 16s 41ms/step - loss: 117.0655 - gender_out_loss: 0.0771 - ethnicity_out_loss: 0.1286 - age_out_loss: 47.7962 - gender_out_accuracy: 0.9774 - ethnicity_out_accuracy: 0.9609 - age_out_mae: 5.1136 - val_loss: 115.0118 - val_gender_out_loss: 0.1642 - val_ethnicity_out_loss: 0.5275 - val_age_out_loss: 46.6406 - val_gender_out_accuracy: 0.9438 - val_ethnicity_out_accuracy: 0.8736 - val_age_out_mae: 4.9571
In [ ]:
model2.save("VGG_retrain.h5")
In [ ]:
model3 = load_model('VGG16_final.h5')

VGG16 FINAL Model¶

In [ ]:
#FINAL MODEL
model3.summary()
Model: "model_3"
__________________________________________________________________________________________________
 Layer (type)                   Output Shape         Param #     Connected to                     
==================================================================================================
 input_4 (InputLayer)           [(None, 50, 50, 3)]  0           []                               
                                                                                                  
 conv2d_15 (Conv2D)             (None, 50, 50, 160)  4480        ['input_4[0][0]']                
                                                                                                  
 batch_normalization_15 (BatchN  (None, 50, 50, 160)  640        ['conv2d_15[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 leaky_re_lu_15 (LeakyReLU)     (None, 50, 50, 160)  0           ['batch_normalization_15[0][0]'] 
                                                                                                  
 max_pooling2d_9 (MaxPooling2D)  (None, 25, 25, 160)  0          ['leaky_re_lu_15[0][0]']         
                                                                                                  
 conv2d_16 (Conv2D)             (None, 25, 25, 192)  276672      ['max_pooling2d_9[0][0]']        
                                                                                                  
 batch_normalization_16 (BatchN  (None, 25, 25, 192)  768        ['conv2d_16[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 leaky_re_lu_16 (LeakyReLU)     (None, 25, 25, 192)  0           ['batch_normalization_16[0][0]'] 
                                                                                                  
 average_pooling2d_6 (AveragePo  (None, 12, 12, 192)  0          ['leaky_re_lu_16[0][0]']         
 oling2D)                                                                                         
                                                                                                  
 conv2d_17 (Conv2D)             (None, 12, 12, 224)  387296      ['average_pooling2d_6[0][0]']    
                                                                                                  
 batch_normalization_17 (BatchN  (None, 12, 12, 224)  896        ['conv2d_17[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 leaky_re_lu_17 (LeakyReLU)     (None, 12, 12, 224)  0           ['batch_normalization_17[0][0]'] 
                                                                                                  
 average_pooling2d_7 (AveragePo  (None, 6, 6, 224)   0           ['leaky_re_lu_17[0][0]']         
 oling2D)                                                                                         
                                                                                                  
 conv2d_18 (Conv2D)             (None, 6, 6, 224)    451808      ['average_pooling2d_7[0][0]']    
                                                                                                  
 batch_normalization_18 (BatchN  (None, 6, 6, 224)   896         ['conv2d_18[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 leaky_re_lu_18 (LeakyReLU)     (None, 6, 6, 224)    0           ['batch_normalization_18[0][0]'] 
                                                                                                  
 max_pooling2d_10 (MaxPooling2D  (None, 3, 3, 224)   0           ['leaky_re_lu_18[0][0]']         
 )                                                                                                
                                                                                                  
 conv2d_19 (Conv2D)             (None, 3, 3, 224)    451808      ['max_pooling2d_10[0][0]']       
                                                                                                  
 batch_normalization_19 (BatchN  (None, 3, 3, 224)   896         ['conv2d_19[0][0]']              
 ormalization)                                                                                    
                                                                                                  
 leaky_re_lu_19 (LeakyReLU)     (None, 3, 3, 224)    0           ['batch_normalization_19[0][0]'] 
                                                                                                  
 flatten_11 (Flatten)           (None, 8064)         0           ['average_pooling2d_7[0][0]']    
                                                                                                  
 max_pooling2d_11 (MaxPooling2D  (None, 1, 1, 224)   0           ['leaky_re_lu_19[0][0]']         
 )                                                                                                
                                                                                                  
 dense_22 (Dense)               (None, 3096)         24969240    ['flatten_11[0][0]']             
                                                                                                  
 flatten_10 (Flatten)           (None, 2016)         0           ['max_pooling2d_10[0][0]']       
                                                                                                  
 flatten_9 (Flatten)            (None, 224)          0           ['max_pooling2d_11[0][0]']       
                                                                                                  
 dropout_8 (Dropout)            (None, 3096)         0           ['dense_22[0][0]']               
                                                                                                  
 dense_20 (Dense)               (None, 1000)         2017000     ['flatten_10[0][0]']             
                                                                                                  
 dense_18 (Dense)               (None, 1000)         225000      ['flatten_9[0][0]']              
                                                                                                  
 dense_23 (Dense)               (None, 3096)         9588312     ['dropout_8[0][0]']              
                                                                                                  
 dense_21 (Dense)               (None, 1000)         1001000     ['dense_20[0][0]']               
                                                                                                  
 dense_19 (Dense)               (None, 1000)         1001000     ['dense_18[0][0]']               
                                                                                                  
 dropout_9 (Dropout)            (None, 3096)         0           ['dense_23[0][0]']               
                                                                                                  
 gender_out (Dense)             (None, 1)            1001        ['dense_21[0][0]']               
                                                                                                  
 ethnicity_out (Dense)          (None, 5)            5005        ['dense_19[0][0]']               
                                                                                                  
 age_out (Dense)                (None, 1)            3097        ['dropout_9[0][0]']              
                                                                                                  
==================================================================================================
Total params: 40,386,815
Trainable params: 40,384,767
Non-trainable params: 2,048
__________________________________________________________________________________________________

INFERENCE - OCCLUDED IMAGES¶

In [ ]:
pred = model3.predict(X_test)

#Accuracy in gender prediction
#Accuracy in ethnicity prediction
#Age mae in age prediction
test_loss,test_gender_loss, test_ethnicity_loss, test_age_loss, test_gender_acc,test_ethnicity_acc,test_age_mae = model3.evaluate(X_test, 
                                                                                      [y_gender_test, y_ethnicity_test, y_age_test], verbose=0)
print(f'\nTest gender accuracy: {test_gender_acc}')
print(f'\nTest ethnicity accuracy: {test_ethnicity_acc}')
print(f'\nTest age MAPE: {test_age_mae}')
128/128 [==============================] - 1s 7ms/step

Test gender accuracy: 0.9509081840515137

Test ethnicity accuracy: 0.8733431696891785

Test age MAPE: 6.598032474517822

ACTIVATION MAP GENERATION¶

In [ ]:
gender_weights = model3.layers[-3].get_weights()[0]
ethnicity_weights = model3.layers[-2].get_weights()[0]
age_weights = model3.layers[-1].get_weights()[0]

WE USE THE LAST CONVOLUTIONAL LAYER AND THE RESPECTIVE OUTPUT LAYERS OF THE MODEL FOR EACH ATTRIBUTE TO GENERATE AN ACTIVATION MAP

In [ ]:
gender_model  = Model(inputs=model3.input,outputs=(model3.layers[-18].output,model3.layers[-3].output))
ethnicity_model = Model(inputs=model3.input,outputs=(model3.layers[-18].output,model3.layers[-2].output))
age_model = Model(inputs=model3.input,outputs=(model3.layers[-18].output,model3.layers[-1].output))
In [ ]:
features_gender, results_gender = gender_model.predict(X_test)
features_ethnicity, results_ethnicity = ethnicity_model.predict(X_test)
features_age, results_age = age_model.predict(X_test)
128/128 [==============================] - 1s 6ms/step
128/128 [==============================] - 1s 5ms/step
128/128 [==============================] - 1s 6ms/step
In [ ]:
#CLASS ACTIVATION MAP
def activation_map(features, weights, results,x,y,z,att):
  for idx in range(x,y):

        features_for_one_img = features[idx,:,:,:]
        height_roomout = X_train.shape[1]/features_for_one_img.shape[0]
        width_roomout  = X_train.shape[2]/features_for_one_img.shape[1]
    
        cam_features = sp.ndimage.zoom(features_for_one_img, (height_roomout, width_roomout, z),order = 1)
        pred = np.argmax(results[idx])
        plt.figure(facecolor='white')
        cam_weights = weights[:,pred]
        cam_output  = np.dot(cam_features,cam_weights)
        fig, axs = plt.subplots(1, 2, figsize=(4, 4))
        plt.grid(False)
        plt.xticks([])
        plt.yticks([])
        if att=='eth':
          buf = 'Predicted Class = ' +str(y_test[idx][1])
        elif att=='gen':
          buf = 'Predicted Class = ' +str(y_test[idx][0]) 
        if att=='age':
          buf = 'Predicted Class = ' +str(y_test[idx][2])         
        # plot original image
        plt.xlabel(buf)
        axs[0].imshow(X_test[idx], alpha=0.5)
        axs[0].set_xlabel("Original Image")
        axs[0].grid(False)
        axs[0].set_axis_off()

        # plot activation map
        axs[1].imshow(np.squeeze(X_test[idx]), alpha=0.7)
        axs[1].imshow(cam_output, cmap='jet', alpha=0.5)
        axs[1].set_title("Class Activation Map")
        axs[1].grid(False)
        axs[0].set_axis_off()
        axs[1].tick_params(axis='both', which='both', length=0)  # Remove ticks

        plt.show()

        axs[0].tick_params(axis='both', which='both', length=0)  # Remove ticks

        # plot activation map
        axs[1].imshow(np.squeeze(X_test[idx]), alpha=0.7)
        axs[1].imshow(cam_output, cmap='jet', alpha=0.5)
        axs[1].set_title("Class Activation Map")
        axs[1].grid(False)
        axs[0].set_axis_off()
        axs[1].tick_params(axis='both', which='both', length=0)  # Remove ticks

        plt.show()
In [ ]:
#activation map to depict how the CNN learns to detect ethnicity
activation_map(features_ethnicity, ethnicity_weights, results_ethnicity,82,93,4.463,'eth')
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
In [ ]:
#activation map to depict how the CNN learns to detect gender
activation_map(features_gender, gender_weights, results_gender,17,32,4.463,'gen')
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
In [ ]:
#activation map to depict how the CNN learns to detect age
activation_map(features_age, age_weights, results_age,45,56,13.82,'age')
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>

MODEL ARCHITECTURE¶

In [ ]:
tf.keras.utils.plot_model(
    model2, to_file='model.png', show_shapes=True, show_layer_names=True,
    rankdir='TB', expand_nested=True, dpi=96
)
Out[ ]:

Predictions on test data¶

In [ ]:
#SHOWING THE RESULTS OF A SUBSET OF TEST DATA
for i in range(4, 55):
    plt.figure(figsize=(2, 2))
    gt_age = y_test[i][2]
    gt_ethnicity = y_test[i][1]
    gt_gender = y_test[i][0]
    print("GROUND TRUTH:")
    print("Gender:", gt_gender)
    print("Ethnicity:", gt_ethnicity)
    print("Age:", gt_age)
    plt.imshow(X_test[i], interpolation='nearest')
    plt.grid(False)
    plt.xticks([])
    plt.yticks([])
    plt.show()   
    pred_gender, pred_ethnicity, pred_age = model3.predict(X_test[i][np.newaxis])
    gender_acc = np.round(pred_gender)
    ethnicity_acc = np.mean(np.argmax(pred_ethnicity))
    age_mae = np.abs(pred_age)
    print("\nPREDICTED:")
    print("Gender:", gender_acc)
    print("Ethnicity:", ethnicity_acc)
    print("Age:", age_mae)    
    print("\n------------------------------------------------\n")
    
GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 29
1/1 [==============================] - 0s 19ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[37.797348]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 3
Age: 42
1/1 [==============================] - 0s 19ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 3.0
Age: [[43.68522]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 1
Age: 32
1/1 [==============================] - 0s 20ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 1.0
Age: [[32.65385]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 2
Age: 58
1/1 [==============================] - 0s 21ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 2.0
Age: [[68.22838]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 50
1/1 [==============================] - 0s 23ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[48.383724]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 1
Age: 55
1/1 [==============================] - 0s 20ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 1.0
Age: [[56.699192]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 3
Age: 32
1/1 [==============================] - 0s 21ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 3.0
Age: [[42.690952]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 2
Age: 24
1/1 [==============================] - 0s 19ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 2.0
Age: [[25.719501]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 1
Age: 26
1/1 [==============================] - 0s 19ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 1.0
Age: [[27.031202]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 2
Age: 29
1/1 [==============================] - 0s 19ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 2.0
Age: [[23.862457]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 32
1/1 [==============================] - 0s 28ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[35.327084]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 1
Age: 89
1/1 [==============================] - 0s 26ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[86.37816]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 80
1/1 [==============================] - 0s 27ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[65.74268]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 35
1/1 [==============================] - 0s 27ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[36.186134]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 42
1/1 [==============================] - 0s 27ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[52.27335]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 26
1/1 [==============================] - 0s 29ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[53.71712]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 2
Age: 45
1/1 [==============================] - 0s 22ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 1.0
Age: [[39.852734]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 82
1/1 [==============================] - 0s 26ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[93.1253]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 20
1/1 [==============================] - 0s 23ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[17.524855]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 1
Age: 26
1/1 [==============================] - 0s 24ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 1.0
Age: [[31.402405]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 1
Age: 23
1/1 [==============================] - 0s 26ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 1.0
Age: [[26.081339]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 25
1/1 [==============================] - 0s 28ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 1.0
Age: [[28.970743]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 4
Age: 6
1/1 [==============================] - 0s 19ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 4.0
Age: [[8.701446]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 3
Age: 28
1/1 [==============================] - 0s 28ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 2.0
Age: [[40.775574]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 1
Age: 27
1/1 [==============================] - 0s 26ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 1.0
Age: [[25.722157]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 24
1/1 [==============================] - 0s 28ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[23.73679]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 1
Age: 25
1/1 [==============================] - 0s 25ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 1.0
Age: [[32.485195]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 53
1/1 [==============================] - 0s 26ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[49.65606]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 70
1/1 [==============================] - 0s 24ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[62.50388]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 22
1/1 [==============================] - 0s 25ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[26.66797]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 60
1/1 [==============================] - 0s 24ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[61.563843]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 2
Age: 32
1/1 [==============================] - 0s 29ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 2.0
Age: [[50.3477]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 3
Age: 26
1/1 [==============================] - 0s 18ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[27.067326]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 1
Age: 35
1/1 [==============================] - 0s 25ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 1.0
Age: [[48.729076]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 32
1/1 [==============================] - 0s 25ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[43.000263]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 85
1/1 [==============================] - 0s 25ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[88.34044]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 3
Age: 50
1/1 [==============================] - 0s 19ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 3.0
Age: [[53.03958]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 2
Age: 27
1/1 [==============================] - 0s 26ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 3.0
Age: [[30.370893]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 3
Age: 62
1/1 [==============================] - 0s 20ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 3.0
Age: [[58.244354]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 22
1/1 [==============================] - 0s 25ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[31.076157]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 48
1/1 [==============================] - 0s 23ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[60.598545]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 1
Age: 45
1/1 [==============================] - 0s 19ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 1.0
Age: [[45.305172]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 2
Age: 29
1/1 [==============================] - 0s 34ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[26.689833]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 47
1/1 [==============================] - 0s 27ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[58.22728]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 27
1/1 [==============================] - 0s 28ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[28.322348]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 2
Age: 31
1/1 [==============================] - 0s 28ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 2.0
Age: [[37.763313]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 2
Age: 23
1/1 [==============================] - 0s 26ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 2.0
Age: [[24.672117]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 3
Age: 39
1/1 [==============================] - 0s 26ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 3.0
Age: [[43.42892]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 72
1/1 [==============================] - 0s 30ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[78.1159]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 26
1/1 [==============================] - 0s 31ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[26.77066]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 2
Age: 16
1/1 [==============================] - 0s 26ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 2.0
Age: [[24.999945]]

------------------------------------------------

VIZUALIZATION AND EVALUATION(CONFUSION MATRIX, PRECISON, RECALL AND F1 SCORE)¶

In [ ]:
from sklearn.metrics import classification_report, confusion_matrix
In [ ]:
pred = model3.predict(X_test)
128/128 [==============================] - 1s 7ms/step

GENDER - CONFUSION MATRIX¶

In [ ]:
#CONFUSION MATRIX - GENDER
Y_pred_gender = np.round(pred[0], 2)
Y_true_gender = y_gender_test
print('Confusion Matrix')
cm = confusion_matrix(Y_true_gender,np.round(pred[0]))
sns.heatmap(cm, annot=True, fmt='d', cbar=False, cmap='Greens');
target_names = ['Male', 'Female']
print(classification_report(Y_true_gender, np.round(pred[0]), target_names=target_names))
Confusion Matrix
              precision    recall  f1-score   support

        Male       0.96      0.95      0.95      2100
      Female       0.95      0.95      0.95      1974

    accuracy                           0.95      4074
   macro avg       0.95      0.95      0.95      4074
weighted avg       0.95      0.95      0.95      4074

ETHNICITY - CONFUSION MATRIX¶

In [ ]:
#CONFUSION MATRIX - ETHNICITY
#0 - represents white, 1 - represents black,2 - represents asian,3 - represents indian and 4 - represents others
Y_pred_Ethn = np.argmax(pred[1],axis=1)
Y_true_ethnicity = np.argmax(y_ethnicity_test,axis = 1)
cm = confusion_matrix(Y_true_ethnicity,Y_pred_Ethn)
sns.heatmap(cm, annot=True, fmt='d', cbar=False, cmap='Greens');
target_names = ['white', 'black','asian','indian','others']
print(classification_report(Y_true_ethnicity, Y_pred_Ethn, target_names=target_names))
              precision    recall  f1-score   support

       white       0.86      0.95      0.90      1778
       black       0.89      0.86      0.88       799
       asian       0.95      0.82      0.88       511
      indian       0.89      0.81      0.85       697
      others       0.78      0.65      0.71       289

    accuracy                           0.87      4074
   macro avg       0.87      0.82      0.84      4074
weighted avg       0.87      0.87      0.87      4074

AGE - REGRESSION PLOT¶

In [ ]:
plt.figure(figsize=(8, 5))
plt.scatter(pred[2], y_age_test)
plt.plot(pred[2], pred[2], color='red')  # Add a regression line
plt.xlabel('Predicted Values')
plt.ylabel('Actual Values')
plt.title('Scatter Plot with Regression Line')
plt.show()

EXPERIMENTS¶

1. TEST ON NON MASKED IMAGES¶

In [ ]:
groundtruth = np.empty((0, 50, 50, 3), dtype=np.float32)
df_nonmasked = pd.DataFrame(columns=['Age', 'Gender', 'Ethnicity'])
for filename in os.listdir('/content/drive/MyDrive/new/'):
      img = cv2.imread('/content/drive/MyDrive/new/'+ str(filename))
      img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)      
      resized_img = cv2.resize(img, (50, 50), interpolation=cv2.INTER_LINEAR)
      flattened_img = resized_img.flatten()
      normalized_img = (flattened_img - flattened_img.min()) / (flattened_img.max() - flattened_img.min())
      normalized_img = normalized_img.reshape(1,50,50,3)
      groundtruth = np.append(groundtruth, normalized_img, axis=0)
      age, gender, ethnicity = filename.split("_")[:3]
      df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
truth_values = np.array(df_nonmasked[['Gender', 'Ethnicity', 'Age']])
# Segregating the labels into different arrays
y_gender_test_1 = truth_values[:,0]
y_ethnicity_test_1 = truth_values[:,1]
y_age_test_1 = truth_values[:,2]
y_gender_test_1 = y_gender_test_1.astype(int)
y_age_test_1 = y_age_test_1.astype(int)
y_ethnicity_1 = to_categorical(y_ethnicity_test_1)
y_ethnicity_test_1 = y_ethnicity_1
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-116-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
In [ ]:
pred = model3.predict(groundtruth)

#Accuracy in gender prediction
#Accuracy in ethnicity prediction
#Age mae in age prediction
test_loss,test_gender_loss, test_ethnicity_loss, test_age_loss, test_gender_acc,test_ethnicity_acc,test_age_mae = model3.evaluate(groundtruth, 
                                                                                            [y_gender_test_1, y_ethnicity_test_1, y_age_test_1], verbose=0)
print(f'\nTest gender accuracy: {test_gender_acc}')
print(f'\nTest ethnicity accuracy: {test_ethnicity_acc}')
print(f'\nTest age MAPE: {test_age_mae}')
1/1 [==============================] - 0s 19ms/step

Test gender accuracy: 0.949999988079071

Test ethnicity accuracy: 0.8999999761581421

Test age MAPE: 5.689876556396484
In [ ]:
i = 1
for filename in os.listdir('/content/drive/MyDrive/new/'):
      img = cv2.imread('/content/drive/MyDrive/new/'+ str(filename))
      img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
      age,gender,ethnicity = filename.split("_")[:3]
      print(i,".","GROUND TRUTH:")
      print("gender :", gender)
      print("ethnicity:", ethnicity)
      print("age:", age,"\n")
      # mg = np.mean(color_img, axis=2)
      resized_img = cv2.resize(img, (50, 50), interpolation=cv2.INTER_LINEAR)
      plt.figure(figsize=(2, 2))
      plt.imshow(img, interpolation='nearest')
      plt.grid(False)
      plt.xticks([])
      plt.yticks([])
      plt.show()
      flattened_img = resized_img.flatten()
      normalized_img = (flattened_img - flattened_img.min()) / (flattened_img.max() - flattened_img.min())
      normalized_img = normalized_img.reshape(1,50,50,3)
      x = normalized_img[0]
      pred_gender, pred_ethnicity, pred_age = model3.predict(x[np.newaxis])
      gender_acc = np.round(pred_gender)
      ethnicity_acc = np.mean(np.argmax(pred_ethnicity))
      age_mae = np.abs(pred_age)
      i = i+1
      print("PREDICTED:")
      print("gender :", gender_acc[0])
      print("ethnicity:", ethnicity_acc)
      print("age:", age_mae[0])
      print("---------------------------------------\n")
      
      
1 . GROUND TRUTH:
gender : 1
ethnicity: 3
age: 26 

1/1 [==============================] - 0s 414ms/step
PREDICTED:
gender : [1.]
ethnicity: 3.0
age: [24.531591]
---------------------------------------

2 . GROUND TRUTH:
gender : 0
ethnicity: 3
age: 27 

1/1 [==============================] - 0s 25ms/step
PREDICTED:
gender : [0.]
ethnicity: 3.0
age: [26.528475]
---------------------------------------

3 . GROUND TRUTH:
gender : 1
ethnicity: 3
age: 27 

1/1 [==============================] - 0s 26ms/step
PREDICTED:
gender : [1.]
ethnicity: 3.0
age: [29.71616]
---------------------------------------

4 . GROUND TRUTH:
gender : 0
ethnicity: 1
age: 42 

1/1 [==============================] - 0s 27ms/step
PREDICTED:
gender : [0.]
ethnicity: 1.0
age: [39.483364]
---------------------------------------

5 . GROUND TRUTH:
gender : 0
ethnicity: 2
age: 42 

1/1 [==============================] - 0s 30ms/step
PREDICTED:
gender : [0.]
ethnicity: 2.0
age: [43.36128]
---------------------------------------

6 . GROUND TRUTH:
gender : 0
ethnicity: 0
age: 45 

1/1 [==============================] - 0s 21ms/step
PREDICTED:
gender : [0.]
ethnicity: 0.0
age: [47.797993]
---------------------------------------

7 . GROUND TRUTH:
gender : 0
ethnicity: 1
age: 45 

1/1 [==============================] - 0s 23ms/step
PREDICTED:
gender : [0.]
ethnicity: 1.0
age: [44.021416]
---------------------------------------

8 . GROUND TRUTH:
gender : 0
ethnicity: 3
age: 45 

1/1 [==============================] - 0s 27ms/step
PREDICTED:
gender : [0.]
ethnicity: 1.0
age: [32.787228]
---------------------------------------

9 . GROUND TRUTH:
gender : 1
ethnicity: 0
age: 45 

1/1 [==============================] - 0s 27ms/step
PREDICTED:
gender : [1.]
ethnicity: 0.0
age: [35.698597]
---------------------------------------

10 . GROUND TRUTH:
gender : 0
ethnicity: 1
age: 46 

1/1 [==============================] - 0s 28ms/step
PREDICTED:
gender : [1.]
ethnicity: 1.0
age: [51.34391]
---------------------------------------

11 . GROUND TRUTH:
gender : 1
ethnicity: 0
age: 53 

1/1 [==============================] - 0s 32ms/step
PREDICTED:
gender : [1.]
ethnicity: 0.0
age: [55.681927]
---------------------------------------

12 . GROUND TRUTH:
gender : 0
ethnicity: 0
age: 55 

1/1 [==============================] - 0s 37ms/step
PREDICTED:
gender : [0.]
ethnicity: 0.0
age: [68.37868]
---------------------------------------

13 . GROUND TRUTH:
gender : 1
ethnicity: 2
age: 6 

1/1 [==============================] - 0s 29ms/step
PREDICTED:
gender : [1.]
ethnicity: 4.0
age: [12.162522]
---------------------------------------

14 . GROUND TRUTH:
gender : 1
ethnicity: 1
age: 70 

1/1 [==============================] - 0s 23ms/step
PREDICTED:
gender : [1.]
ethnicity: 0.0
age: [87.014786]
---------------------------------------

15 . GROUND TRUTH:
gender : 0
ethnicity: 1
age: 75 

1/1 [==============================] - 0s 28ms/step
PREDICTED:
gender : [0.]
ethnicity: 1.0
age: [73.76995]
---------------------------------------

16 . GROUND TRUTH:
gender : 0
ethnicity: 3
age: 75 

1/1 [==============================] - 0s 28ms/step
PREDICTED:
gender : [0.]
ethnicity: 3.0
age: [59.998203]
---------------------------------------

17 . GROUND TRUTH:
gender : 0
ethnicity: 0
age: 80 

1/1 [==============================] - 0s 20ms/step
PREDICTED:
gender : [0.]
ethnicity: 0.0
age: [92.188705]
---------------------------------------

18 . GROUND TRUTH:
gender : 1
ethnicity: 4
age: 9 

1/1 [==============================] - 0s 29ms/step
PREDICTED:
gender : [1.]
ethnicity: 3.0
age: [7.8003836]
---------------------------------------

19 . GROUND TRUTH:
gender : 1
ethnicity: 2
age: 44 

1/1 [==============================] - 0s 28ms/step
PREDICTED:
gender : [1.]
ethnicity: 2.0
age: [46.14566]
---------------------------------------

20 . GROUND TRUTH:
gender : 1
ethnicity: 0
age: 31 

1/1 [==============================] - 0s 20ms/step
PREDICTED:
gender : [1.]
ethnicity: 0.0
age: [30.50393]
---------------------------------------

2 .Making predictions on non superimposed occluded test data¶

In [ ]:
for filename in os.listdir('/content/drive/MyDrive/masked/'):
  path = '/content/drive/MyDrive/masked/'+str(filename)+'/'
  # print(path)
  for f in os.listdir(path):
      # print(f)
      img = cv2.imread(path+ str(f))
      img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
      print(filename[7:])
      # img = np.mean(color_img, axis=2)
      resized_img = cv2.resize(img, (50, 50), interpolation=cv2.INTER_LINEAR)
      age,gender,ethnicity = filename.split("_")[:3]
      print("GROUND TRUTH:")
      print("age:",age)
      print("gender :", gender)
      print("ethnicity:", ethnicity)
      plt.figure(figsize=(2, 2))
      plt.imshow(img, interpolation='nearest')
      plt.grid(False)
      plt.xticks([])
      plt.yticks([])
      plt.show()
      flattened_img = resized_img.flatten()
      normalized_img = (flattened_img - flattened_img.min()) / (flattened_img.max() - flattened_img.min())
      normalized_img = normalized_img.reshape(1,50,50,3)
      x = normalized_img[0]
      pred_gender, pred_ethnicity, pred_age = model3.predict(x[np.newaxis])
      gender_acc = np.round(pred_gender)
      ethnicity_acc = np.mean(np.argmax(pred_ethnicity))
      age_mae = np.abs(pred_age)
      print("PREDICTED:")
      print("age:", age_mae[0])
      print("gender :", gender_acc[0])
      print("ethnicity:", ethnicity_acc)

      print("---------------------------------------\n")
gary_peters_mask
GROUND TRUTH:
age: 59
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 28ms/step
PREDICTED:
age: [57.84138]
gender : [0.]
ethnicity: 0.0
---------------------------------------

brie_larson_wearing_sunglasses
GROUND TRUTH:
age: 30
gender : 1
ethnicity: 0
1/1 [==============================] - 0s 26ms/step
PREDICTED:
age: [16.880663]
gender : [1.]
ethnicity: 0.0
---------------------------------------

brad_pitt_wearing_sunglasses
GROUND TRUTH:
age: 52
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 28ms/step
PREDICTED:
age: [47.544365]
gender : [0.]
ethnicity: 0.0
---------------------------------------

ben_mendelsohn_wearing_sunglasses
GROUND TRUTH:
age: 63
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 21ms/step
PREDICTED:
age: [67.32037]
gender : [0.]
ethnicity: 0.0
---------------------------------------

anthony_mackie_wearing_sunglasses
GROUND TRUTH:
age: 44
gender : 0
ethnicity: 1
1/1 [==============================] - 0s 30ms/step
PREDICTED:
age: [49.08929]
gender : [1.]
ethnicity: 0.0
---------------------------------------

arnold_schwarzenegger_wearing_sunglasses
GROUND TRUTH:
age: 75
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 21ms/step
PREDICTED:
age: [74.319984]
gender : [0.]
ethnicity: 0.0
---------------------------------------

benedict_cumberbatch_wearing_sunglasses
GROUND TRUTH:
age: 41
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 28ms/step
PREDICTED:
age: [37.056385]
gender : [0.]
ethnicity: 0.0
---------------------------------------

adrien_brody_wearing_sunglasses
GROUND TRUTH:
age: 48
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 19ms/step
PREDICTED:
age: [46.97447]
gender : [0.]
ethnicity: 2.0
---------------------------------------

thomas_muller_wearing_mask
GROUND TRUTH:
age: 33
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 21ms/step
PREDICTED:
age: [48.1911]
gender : [0.]
ethnicity: 0.0
---------------------------------------

patty_murray_wearing_mask
GROUND TRUTH:
age: 72
gender : 0
ethnicity: 1
1/1 [==============================] - 0s 19ms/step
PREDICTED:
age: [75.04024]
gender : [0.]
ethnicity: 0.0
---------------------------------------

naomi_osaka_wearing_mask
GROUND TRUTH:
age: 26
gender : 1
ethnicity: 2
1/1 [==============================] - 0s 27ms/step
PREDICTED:
age: [27.747593]
gender : [1.]
ethnicity: 2.0
---------------------------------------

justin_trudeau_wearing_mask
GROUND TRUTH:
age: 52
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 29ms/step
PREDICTED:
age: [49.92174]
gender : [0.]
ethnicity: 0.0
---------------------------------------

kamala_haris_wearing_mask
GROUND TRUTH:
age: 60
gender : 1
ethnicity: 3
1/1 [==============================] - 0s 20ms/step
PREDICTED:
age: [63.821636]
gender : [1.]
ethnicity: 0.0
---------------------------------------

lewis_hamilton_wearing_mask
GROUND TRUTH:
age: 40
gender : 0
ethnicity: 1
1/1 [==============================] - 0s 19ms/step
PREDICTED:
age: [41.370056]
gender : [0.]
ethnicity: 0.0
---------------------------------------

gary_peters_wearing_sunglasses
GROUND TRUTH:
age: 64
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 26ms/step
PREDICTED:
age: [61.57495]
gender : [1.]
ethnicity: 0.0
---------------------------------------

jean_castex_wearing_mask
GROUND TRUTH:
age: 60
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 20ms/step
PREDICTED:
age: [65.42437]
gender : [0.]
ethnicity: 0.0
---------------------------------------

jayson_tatum_wearing_mask
GROUND TRUTH:
age: 23
gender : 0
ethnicity: 1
1/1 [==============================] - 0s 32ms/step
PREDICTED:
age: [14.43739]
gender : [0.]
ethnicity: 1.0
---------------------------------------

immanuel_macron_wearing_mask
GROUND TRUTH:
age: 46
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 29ms/step
PREDICTED:
age: [45.433857]
gender : [0.]
ethnicity: 0.0
---------------------------------------

george_russell_wearing_mask
GROUND TRUTH:
age: 26
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 27ms/step
PREDICTED:
age: [30.939505]
gender : [0.]
ethnicity: 1.0
---------------------------------------

debbie_stabenow_wearing_mask
GROUND TRUTH:
age: 73
gender : 1
ethnicity: 1
1/1 [==============================] - 0s 28ms/step
PREDICTED:
age: [72.90281]
gender : [0.]
ethnicity: 0.0
---------------------------------------

diego_schwartzman_wearing_mask
GROUND TRUTH:
age: 33
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 22ms/step
PREDICTED:
age: [21.641476]
gender : [0.]
ethnicity: 1.0
---------------------------------------

bill_gates_wearing_mask
GROUND TRUTH:
age: 67
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 29ms/step
PREDICTED:
age: [71.82434]
gender : [0.]
ethnicity: 0.0
---------------------------------------

bruno_fernandes_wearing_mask
GROUND TRUTH:
age: 45
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 22ms/step
PREDICTED:
age: [44.17545]
gender : [0.]
ethnicity: 0.0
---------------------------------------

carlos_sainz_wearing_mask
GROUND TRUTH:
age: 30
gender : 0
ethnicity: 4
1/1 [==============================] - 0s 23ms/step
PREDICTED:
age: [55.401897]
gender : [0.]
ethnicity: 1.0
---------------------------------------

charles_leclerc_wearing_mask
GROUND TRUTH:
age: 25
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 25ms/step
PREDICTED:
age: [31.727945]
gender : [0.]
ethnicity: 0.0
---------------------------------------

chuck_schumer_wearing_mask
GROUND TRUTH:
age: 71
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 20ms/step
PREDICTED:
age: [72.32113]
gender : [1.]
ethnicity: 0.0
---------------------------------------

alexander_zverev_wearing_mask
GROUND TRUTH:
age: 36
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 26ms/step
PREDICTED:
age: [27.507105]
gender : [0.]
ethnicity: 0.0
---------------------------------------

evangeline_lilly_wearing_sunglasses
GROUND TRUTH:
age: 45
gender : 1
ethnicity: 0
1/1 [==============================] - 0s 28ms/step
PREDICTED:
age: [48.942722]
gender : [1.]
ethnicity: 0.0
---------------------------------------

emily_blunt_wearing_sunglasses
GROUND TRUTH:
age: 38
gender : 1
ethnicity: 0
1/1 [==============================] - 0s 27ms/step
PREDICTED:
age: [41.768288]
gender : [1.]
ethnicity: 0.0
---------------------------------------

chris_hemsworth_wearing_sunglasses
GROUND TRUTH:
age: 32
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 22ms/step
PREDICTED:
age: [22.584406]
gender : [0.]
ethnicity: 0.0
---------------------------------------

christian_bale_wearing_sunglasses
GROUND TRUTH:
age: 50
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 21ms/step
PREDICTED:
age: [58.322582]
gender : [0.]
ethnicity: 0.0
---------------------------------------

bruce_willis_wearing_sunglasses
GROUND TRUTH:
age: 68
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 20ms/step
PREDICTED:
age: [69.92977]
gender : [0.]
ethnicity: 0.0
---------------------------------------

The code in this project aims to develop custom CNN architectures for accurate prediction of gender, age, and ethnicity from partially occluded facial images. The architectures are based on AlexNet, ResNet, and VGG models, and they are designed for multi-task deep learning. The code includes data preprocessing, model building, training, and evaluation steps.

SOURCES AND REFERENCES:

1) UTK FACE DATASET - https://www.kaggle.com/datasets/jangedoo/utkface-new

2) MASK THE FACE( To superimpose masks) - https://github.com/aqeelanwar/MaskTheFace

3) RESNET VARIATION ARCHITECTURE - https://arxiv.org/abs/2201.03002

4) CLASS ACTIVATION MAPS - https://towardsdatascience.com/demystifying-convolutional-neural-networks-using-class-activation-maps-fe94eda4cef1

5) https://stackoverflow.com/

6) https://opencv.org/

7) https://keras.io/about/

8) https://matplotlib.org/